# STAT 554 Final Project
# Setting the working directory
setwd("~/Desktop/R/stat_554_project")
# Setting the projection, for future use
# proj <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84")
########################################################################
# Loading Required packages
library(sp)
library(splancs)
## 
## Spatial Point Pattern Analysis Code in S-Plus
##  
##  Version 2 - Spatial and Space-Time analysis
library(maps)
library(shapefiles)
## Loading required package: foreign
## 
## Attaching package: 'shapefiles'
## The following objects are masked from 'package:foreign':
## 
##     read.dbf, write.dbf
library(maptools)
## Checking rgeos availability: TRUE
library(raster)
## 
## Attaching package: 'raster'
## The following object is masked from 'package:splancs':
## 
##     zoom
library(geoR)
## --------------------------------------------------------------
##  Analysis of Geostatistical Data
##  For an Introduction to geoR go to http://www.leg.ufpr.br/geoR
##  geoR version 1.7-5.2 (built on 2016-05-02) is now loaded
## --------------------------------------------------------------
library(dplyr)
## 
## Attaching package: 'dplyr'
## The following objects are masked from 'package:raster':
## 
##     intersect, select, union
## The following objects are masked from 'package:stats':
## 
##     filter, lag
## The following objects are masked from 'package:base':
## 
##     intersect, setdiff, setequal, union
#Load the dataset, and transfer the census tract into the correct format

#########################################################################
# Very Messy below:
#########################################################################
robbery<-read.csv(file="sea_rob.csv",header=TRUE)
robbery$ct<-as.numeric(as.character(robbery$Census.Tract))
## Warning: NAs introduced by coercion
robbery$ct<-trunc(robbery$ct,digits=0)
robbery$ct<-(robbery$ct/100)
robbery$control<-1
robbery.df<-as.data.frame(table(robbery$ct))
names(robbery.df)[1] = 'tract'
names(robbery.df)[2] = 'rob.n'
homicide<-read.csv(file="sea_homi.csv",header=TRUE)
homicide$ct<-as.numeric(as.character(homicide$Census.Tract))
## Warning: NAs introduced by coercion
homicide$ct<-trunc(homicide$ct,digits=0)
homicide$ct<-(homicide$ct/100)
homicide$control<-1
homicide.df<-as.data.frame(table(homicide$ct))
names(homicide.df)[1] = 'tract'
names(homicide.df)[2] = 'hom.n'
assault<-read.csv(file="sea_ass.csv",header=TRUE)
assault$ct<-as.numeric(as.character(assault$Census.Tract))
## Warning: NAs introduced by coercion
assault$ct<-trunc(assault$ct,digits=0)
assault$ct<-(assault$ct/100)
assault$control<-1
assault.df<-as.data.frame(table(assault$ct))
names(assault.df)[1] = 'tract'
names(assault.df)[2] = 'ass.n'
burglary<-read.csv(file="sea_burg.csv",header=TRUE)
burglary$ct<-as.numeric(as.character(burglary$Census.Tract))
## Warning: NAs introduced by coercion
burglary$ct<-trunc(burglary$ct,digits=0)
burglary$ct<-(burglary$ct/100)
burglary$control<-1
burglary.df<-as.data.frame(table(burglary$ct))
names(burglary.df)[1] = 'tract'
names(burglary.df)[2] = 'burg.n'
damage<-read.csv(file="sea_prop.csv",header=TRUE)
damage$ct<-as.numeric(as.character(damage$Census.Tract))
## Warning: NAs introduced by coercion
damage$ct<-trunc(damage$ct,digits=0)
damage$ct<-(damage$ct/100)
damage$control<-1
damage.df<-as.data.frame(table(damage$ct))
names(damage.df)[1] = 'tract'
names(damage.df)[2] = 'pdam.n'
shoplifting<-read.csv(file="sea_shop.csv",header=TRUE)
shoplifting$ct<-as.numeric(as.character(shoplifting$Census.Tract))
## Warning: NAs introduced by coercion
shoplifting$ct<-trunc(shoplifting$ct,digits=0)
shoplifting$ct<-(shoplifting$ct/100)
shoplifting$control<-1
shoplifting.df<-as.data.frame(table(shoplifting$ct))
names(shoplifting.df)[1] = 'tract'
names(shoplifting.df)[2] = 'shop.n'
df1<-merge(homicide.df,robbery.df, by = "tract", all=TRUE)
df2<-merge(assault.df, burglary.df, by = "tract", all=TRUE)
df3<-merge(shoplifting.df, damage.df, by ="tract", all=TRUE)
df4<-merge(df1,df2, by ="tract", all=TRUE)
tot<-merge(df3,df4, by ="tract", all=TRUE)
rm(df1,df2,df3,df4,assault.df,burglary.df,homicide.df,
   robbery.df,damage.df,shoplifting.df)
tot$tract<-as.numeric(as.character(tot$tract))
tot[is.na(tot)] <- 0
tot<-arrange(tot,tract)
tot[19,2:7]<- tot[19,2:7]+(tot[18,2:7]/2)
tot[20,2:7]<- tot[20,2:7]+(tot[18,2:7]/2)
tot[45,2:7]<- tot[45,2:7]+(tot[44,2:7]/2)
tot[46,2:7]<- tot[46,2:7]+(tot[44,2:7]/2)
tot[79,2:7]<- tot[79,2:7]+(tot[78,2:7]/2)
tot[80,2:7]<- tot[80,2:7]+(tot[78,2:7]/2)
tot[109,2:7]<- tot[109,2:7]+(tot[108,2:7]/2)
tot[110,2:7]<- tot[110,2:7]+(tot[108,2:7]/2)
tot[115,2:7]<- tot[115,2:7]+(tot[114,2:7]/2)
tot[116,2:7]<- tot[116,2:7]+(tot[114,2:7]/2)
tot[125,2:7]<- tot[125,2:7]+(tot[124,2:7]/2)
tot[126,2:7]<- tot[126,2:7]+(tot[124,2:7]/2)
tot[132,2:7]<- tot[132,2:7]+(tot[131,2:7]/2)
tot[133,2:7]<- tot[133,2:7]+(tot[131,2:7]/2)
tot<-tot[-c(18,44,78,108,114,124,131),]
tot<-arrange(tot,tract)
tot<-tot[-c(134,135),]
tot[115,2:7]<- tot[115,2:7]+(tot[114,2:7]/2)
tot[116,2:7]<- tot[116,2:7]+(tot[114,2:7]/2)
tot<-tot[-c(114),]
tot<-arrange(tot,tract)
#############################################################################################
# Finish the messy part
#############################################################################
# Loading the demographic data
demo <-read.csv(file="sea_demog.csv",header=TRUE)

# Match the crime data and demographic data by tract,calling the 
# new dataset 'merged'
merged<-merge(demo,tot,by="tract", all=TRUE)


##############################################################################
# Messy: Calculating crime rates
merged$hom.p <- merged$hom.n/merged$pop*1000
merged$rob.p <- merged$rob.n/merged$pop*1000
merged$ass.p <- merged$ass.n/merged$pop*1000
merged$pdam.p <- merged$pdam.n/merged$pop*1000
merged$shop.p <- merged$shop.n/merged$pop*1000
merged$burg.p <- merged$burg.n/merged$pop*1000
merged$tot.n <- merged$hom.n+merged$rob.n+merged$ass.n+merged$pdam.n+merged$shop.n+merged$burg.n
merged$tot.p <- merged$tot.n/merged$pop*1000
################################################################################

## Read the spatial data
seattle <- readShapePoly("seattle.shp")
class(seattle)
## [1] "SpatialPolygonsDataFrame"
## attr(,"package")
## [1] "sp"
plot(seattle)

# Edit a variable ct in the seattle shapefile to let the census tract match the two digits
# format in the crime data
seattle$NAME10<-as.numeric(as.character(seattle$NAME10))
seattle$NAME10<-round(seattle$NAME10,digits = 2)
seattle$tract<-seattle$NAME10
head(seattle$tract)
## [1] 8.00 7.00 6.00 5.00 4.02 4.01
#########################################################################
# Very messy part again, creating expected counts for all types of crime
# versus the observed counts
merged$Y1 <-Y1 <- merged$rob.n
merged$E1 <- E1<- sum(Y1)*merged$pop/sum(merged$pop)

merged$Y2<-Y2 <- merged$burg.n
merged$E2<- E2<- sum(Y2)*merged$pop/sum(merged$pop)

merged$Y3<-Y3 <- merged$shop.n
merged$E3<- E3<-sum(Y3)*merged$pop/sum(merged$pop)

merged$Y4<-Y4 <- merged$pdam.n
merged$E4<- E4 <-sum(Y4)*merged$pop/sum(merged$pop)

merged$Y5<-Y5 <- merged$ass.n
merged$E5<- E5<-sum(Y5)*merged$pop/sum(merged$pop)

merged$Y6<- Y6 <- merged$hom.n
merged$E6<- E6<-sum(Y6)*merged$pop/sum(merged$pop)

# Calculating the SMR, treating mortality as an incidence of crime happened...
merged$smr1<-merged$Y1/merged$E1
merged$smr2<-merged$Y2/merged$E2
merged$smr3<-merged$Y3/merged$E3
merged$smr4<-merged$Y4/merged$E4
merged$smr5<-merged$Y5/merged$E5
merged$smr6<-merged$Y6/merged$E6


pov<- as.numeric(as.character(merged$pov))
## Warning: NAs introduced by coercion
pov[55]=19.000
nohs<-as.numeric(as.character(merged$nohs))
het<- as.numeric(as.character(merged$het))
#########################################################################

# Finally, we have the SPDF file with everything we want!
seattle@data = data.frame(seattle@data, merged[match(seattle@data[,"NAME10"], merged[,"tract"]),])

#########################################################################
# Part 2. Let us see the clustering of census tract crime data
#########################################################################
# Transfer Longitude and latitude into UTM
oldCRS<-data.frame(seattle$INTPTLON10, seattle$INTPTLAT10)
newCRS<-read.csv(file="utm_seattle.csv",header=TRUE) # Upload the converter file
seattle$northing<- newCRS$northing
seattle$easting<- newCRS$easting
# Load the required package, and see difference of census tracts
# in crime rate.
library(SpatialEpi)
library(spdep)
## Loading required package: Matrix
nb_q<-poly2nb(seattle)
brks<-c(0,20,40,80,160,320,640,1280,2560)
spplot(seattle,zcol="tot.p",at = brks)

brks3<-c(0,2,6,12,24,40,60)
spplot(seattle,zcol="rob.p",at=brks3)

brks2<-c(0,2,6,12,24,40,60,120,240,480,960)
spplot(seattle,zcol="ass.p", at=brks2)

spplot(seattle,zcol="burg.p", at=brks3)

hist(seattle$tot.p, breaks=200, xlim=c(0,1200),
     col="lightblue",main="Seattle Crime Rate in Census Tract",
     xlab="Crime Rate (Counts per 1000)",freq=TRUE)

coords<-coordinates(seattle)
plot(nb_q,coords, col="pink")

# Monte Carlo Test for Overdispersion
kappaval <- function(Y,fitted,df){
  sum((Y-fitted)^2/fitted)/df}
mod <- glm(Y1 ~ 1, offset=log(E1),family="quasipoisson"(link="log"))
kappaest <- kappaval(Y1,mod$fitted,mod$df.resid)
nMC <- 1000
ncts <- length(E1)
yMC <- matrix(rpois(n=nMC*ncts,lambda=E1),
              nrow=ncts,ncol=nMC)
kappaMC <- NULL
for (i in 1:nMC){
  modMC <- glm(yMC[,i]~ 1,offset=log(E1), data=seattle,family="quasipoisson")
  kappaMC[i] <- kappaval(yMC[,i],modMC$fitted,modMC$df.resid)
}
summary(modMC)
## 
## Call:
## glm(formula = yMC[, i] ~ 1, family = "quasipoisson", data = seattle, 
##     offset = log(E1))
## 
## Deviance Residuals: 
##     Min       1Q   Median       3Q      Max  
## -5.0769  -1.5502   0.1674   1.4426   6.1025  
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)
## (Intercept) -0.02665    0.04587  -0.581    0.562
## 
## (Dispersion parameter for quasipoisson family taken to be 5.765535)
## 
##     Null deviance: 685.39  on 131  degrees of freedom
## Residual deviance: 685.39  on 131  degrees of freedom
## AIC: NA
## 
## Number of Fisher Scoring iterations: 4
summary(kappaMC)
##    Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
##   4.749   5.771   6.102   6.135   6.479   8.073
#########################################################################
# Using the poly2nb to create neighborhoods for seattle
library(classInt)
library(RColorBrewer)
library(maps)
library(SpatialEpi)
library(maptools)
sea.nb <-poly2nb(seattle)
col.W<-nb2listw(sea.nb, style="W", zero.policy = TRUE)
col.B<-nb2listw(sea.nb, style="B", zero.policy = TRUE)
col.S <- nb2listw(sea.nb, style="S", zero.policy=TRUE)

#########################################################################
# Messy part again, for quasipoisson models testing the observed 
# counts against the expected count
mod1 <- (glm(Y1~1+offset(log(E1)),family="quasipoisson"))
residual1<-residuals(mod1,type="pearson")

mod2 <- (glm(Y2~1+offset(log(E2)),family="quasipoisson"))
residual2<-residuals(mod2,type="pearson")

mod3 <- (glm(Y3~1+offset(log(E3)),family="quasipoisson"))
residual3<-residuals(mod3,type="pearson")

mod4 <- (glm(Y4~1+offset(log(E4)),family="quasipoisson"))
residual4<-residuals(mod4,type="pearson")

mod5 <- (glm(Y1~1+offset(log(E5)),family="quasipoisson"))
residual5<-residuals(mod5,type="pearson")

mod6 <- (glm(Y2~1+offset(log(E6)),family="quasipoisson"))
residual6<-residuals(mod6,type="pearson")



moran.test(residual1, col.W) # Robbery
## 
##  Moran I test under randomisation
## 
## data:  residual1  
## weights: col.W  
## 
## Moran I statistic standard deviate = 5.9326, p-value = 1.491e-09
## alternative hypothesis: greater
## sample estimates:
## Moran I statistic       Expectation          Variance 
##       0.275023898      -0.007633588       0.002269995
moran.test(residual2, col.W) # Burglary
## 
##  Moran I test under randomisation
## 
## data:  residual2  
## weights: col.W  
## 
## Moran I statistic standard deviate = 6.2284, p-value = 2.357e-10
## alternative hypothesis: greater
## sample estimates:
## Moran I statistic       Expectation          Variance 
##       0.317482546      -0.007633588       0.002724760
moran.test(residual3, col.W) # Shoplifting
## 
##  Moran I test under randomisation
## 
## data:  residual3  
## weights: col.W  
## 
## Moran I statistic standard deviate = 3.4057, p-value = 0.0003299
## alternative hypothesis: greater
## sample estimates:
## Moran I statistic       Expectation          Variance 
##       0.130340479      -0.007633588       0.001641235
moran.test(residual4, col.W) # Property Damage
## 
##  Moran I test under randomisation
## 
## data:  residual4  
## weights: col.W  
## 
## Moran I statistic standard deviate = 3.3832, p-value = 0.0003582
## alternative hypothesis: greater
## sample estimates:
## Moran I statistic       Expectation          Variance 
##       0.158478395      -0.007633588       0.002410723
moran.test(residual5, col.W) # Assault
## 
##  Moran I test under randomisation
## 
## data:  residual5  
## weights: col.W  
## 
## Moran I statistic standard deviate = 5.9326, p-value = 1.491e-09
## alternative hypothesis: greater
## sample estimates:
## Moran I statistic       Expectation          Variance 
##       0.275023898      -0.007633588       0.002269995
moran.test(residual6, col.W) # Homicide
## 
##  Moran I test under randomisation
## 
## data:  residual6  
## weights: col.W  
## 
## Moran I statistic standard deviate = 6.2284, p-value = 2.357e-10
## alternative hypothesis: greater
## sample estimates:
## Moran I statistic       Expectation          Variance 
##       0.317482546      -0.007633588       0.002724760
moran.test(residual1, col.B) # Robbery
## 
##  Moran I test under randomisation
## 
## data:  residual1  
## weights: col.B  
## 
## Moran I statistic standard deviate = 6.0719, p-value = 6.322e-10
## alternative hypothesis: greater
## sample estimates:
## Moran I statistic       Expectation          Variance 
##       0.272915704      -0.007633588       0.002134888
moran.test(residual2, col.B) # Burglary
## 
##  Moran I test under randomisation
## 
## data:  residual2  
## weights: col.B  
## 
## Moran I statistic standard deviate = 7.0584, p-value = 8.424e-13
## alternative hypothesis: greater
## sample estimates:
## Moran I statistic       Expectation          Variance 
##       0.349486355      -0.007633588       0.002559881
moran.test(residual3, col.B) # Shoplifting
## 
##  Moran I test under randomisation
## 
## data:  residual3  
## weights: col.B  
## 
## Moran I statistic standard deviate = 3.0965, p-value = 0.0009791
## alternative hypothesis: greater
## sample estimates:
## Moran I statistic       Expectation          Variance 
##       0.114169381      -0.007633588       0.001547290
moran.test(residual4, col.B) # Property Damage
## 
##  Moran I test under randomisation
## 
## data:  residual4  
## weights: col.B  
## 
## Moran I statistic standard deviate = 3.4507, p-value = 0.0002795
## alternative hypothesis: greater
## sample estimates:
## Moran I statistic       Expectation          Variance 
##       0.156645388      -0.007633588       0.002266403
moran.test(residual5, col.B) # Assault
## 
##  Moran I test under randomisation
## 
## data:  residual5  
## weights: col.B  
## 
## Moran I statistic standard deviate = 6.0719, p-value = 6.322e-10
## alternative hypothesis: greater
## sample estimates:
## Moran I statistic       Expectation          Variance 
##       0.272915704      -0.007633588       0.002134888
moran.test(residual6, col.B) # Homicide
## 
##  Moran I test under randomisation
## 
## data:  residual6  
## weights: col.B  
## 
## Moran I statistic standard deviate = 7.0584, p-value = 8.424e-13
## alternative hypothesis: greater
## sample estimates:
## Moran I statistic       Expectation          Variance 
##       0.349486355      -0.007633588       0.002559881
moran.test(residual1, col.S) # Robbery
## 
##  Moran I test under randomisation
## 
## data:  residual1  
## weights: col.S  
## 
## Moran I statistic standard deviate = 6.0663, p-value = 6.546e-10
## alternative hypothesis: greater
## sample estimates:
## Moran I statistic       Expectation          Variance 
##       0.274696217      -0.007633588       0.002166058
moran.test(residual2, col.S) # Burglary
## 
##  Moran I test under randomisation
## 
## data:  residual2  
## weights: col.S  
## 
## Moran I statistic standard deviate = 6.7195, p-value = 9.116e-12
## alternative hypothesis: greater
## sample estimates:
## Moran I statistic       Expectation          Variance 
##       0.334923926      -0.007633588       0.002598900
moran.test(residual3, col.S) # Shoplifting
## 
##  Moran I test under randomisation
## 
## data:  residual3  
## weights: col.S  
## 
## Moran I statistic standard deviate = 3.2907, p-value = 0.0004996
## alternative hypothesis: greater
## sample estimates:
## Moran I statistic       Expectation          Variance 
##       0.122656522      -0.007633588       0.001567608
moran.test(residual4, col.S) # Property Damage
## 
##  Moran I test under randomisation
## 
## data:  residual4  
## weights: col.S  
## 
## Moran I statistic standard deviate = 3.45, p-value = 0.0002803
## alternative hypothesis: greater
## sample estimates:
## Moran I statistic       Expectation          Variance 
##       0.157820460      -0.007633588       0.002300002
moran.test(residual5, col.S) # Assault
## 
##  Moran I test under randomisation
## 
## data:  residual5  
## weights: col.S  
## 
## Moran I statistic standard deviate = 6.0663, p-value = 6.546e-10
## alternative hypothesis: greater
## sample estimates:
## Moran I statistic       Expectation          Variance 
##       0.274696217      -0.007633588       0.002166058
moran.test(residual6, col.S) # Homicide
## 
##  Moran I test under randomisation
## 
## data:  residual6  
## weights: col.S  
## 
## Moran I statistic standard deviate = 6.7195, p-value = 9.116e-12
## alternative hypothesis: greater
## sample estimates:
## Moran I statistic       Expectation          Variance 
##       0.334923926      -0.007633588       0.002598900
geary.test(residual1, col.W) # Robbery
## 
##  Geary C test under randomisation
## 
## data:  residual1 
## weights: col.W 
## 
## Geary C statistic standard deviate = 3.4716, p-value = 0.0002587
## alternative hypothesis: Expectation greater than statistic
## sample estimates:
## Geary C statistic       Expectation          Variance 
##       0.720754818       1.000000000       0.006470287
geary.test(residual2, col.W) # Burglary
## 
##  Geary C test under randomisation
## 
## data:  residual2 
## weights: col.W 
## 
## Geary C statistic standard deviate = 5.822, p-value = 2.907e-09
## alternative hypothesis: Expectation greater than statistic
## sample estimates:
## Geary C statistic       Expectation          Variance 
##        0.66332702        1.00000000        0.00334403
geary.test(residual3, col.W) # Shoplifting
## 
##  Geary C test under randomisation
## 
## data:  residual3 
## weights: col.W 
## 
## Geary C statistic standard deviate = 1.7649, p-value = 0.03879
## alternative hypothesis: Expectation greater than statistic
## sample estimates:
## Geary C statistic       Expectation          Variance 
##        0.81664944        1.00000000        0.01079267
geary.test(residual4, col.W) # Property Damage
## 
##  Geary C test under randomisation
## 
## data:  residual4 
## weights: col.W 
## 
## Geary C statistic standard deviate = 1.4044, p-value = 0.08009
## alternative hypothesis: Expectation greater than statistic
## sample estimates:
## Geary C statistic       Expectation          Variance 
##        0.89581628        1.00000000        0.00550286
geary.test(residual5, col.W) # Assault
## 
##  Geary C test under randomisation
## 
## data:  residual5 
## weights: col.W 
## 
## Geary C statistic standard deviate = 3.4716, p-value = 0.0002587
## alternative hypothesis: Expectation greater than statistic
## sample estimates:
## Geary C statistic       Expectation          Variance 
##       0.720754818       1.000000000       0.006470287
geary.test(residual6, col.W) # Homicide
## 
##  Geary C test under randomisation
## 
## data:  residual6 
## weights: col.W 
## 
## Geary C statistic standard deviate = 5.822, p-value = 2.907e-09
## alternative hypothesis: Expectation greater than statistic
## sample estimates:
## Geary C statistic       Expectation          Variance 
##        0.66332702        1.00000000        0.00334403
geary.test(residual1, col.B) # Robbery
## 
##  Geary C test under randomisation
## 
## data:  residual1 
## weights: col.B 
## 
## Geary C statistic standard deviate = 1.9563, p-value = 0.02522
## alternative hypothesis: Expectation greater than statistic
## sample estimates:
## Geary C statistic       Expectation          Variance 
##        0.74352008        1.00000000        0.01718847
geary.test(residual2, col.B) # Burglary
## 
##  Geary C test under randomisation
## 
## data:  residual2 
## weights: col.B 
## 
## Geary C statistic standard deviate = 4.7189, p-value = 1.186e-06
## alternative hypothesis: Expectation greater than statistic
## sample estimates:
## Geary C statistic       Expectation          Variance 
##       0.670555162       1.000000000       0.004873943
geary.test(residual3, col.B) # Shoplifting
## 
##  Geary C test under randomisation
## 
## data:  residual3 
## weights: col.B 
## 
## Geary C statistic standard deviate = 1.1021, p-value = 0.1352
## alternative hypothesis: Expectation greater than statistic
## sample estimates:
## Geary C statistic       Expectation          Variance 
##        0.79614632        1.00000000        0.03421461
geary.test(residual4, col.B) # Property Damage
## 
##  Geary C test under randomisation
## 
## data:  residual4 
## weights: col.B 
## 
## Geary C statistic standard deviate = -0.14607, p-value = 0.5581
## alternative hypothesis: Expectation greater than statistic
## sample estimates:
## Geary C statistic       Expectation          Variance 
##        1.01689503        1.00000000        0.01337771
geary.test(residual5, col.B) # Assault
## 
##  Geary C test under randomisation
## 
## data:  residual5 
## weights: col.B 
## 
## Geary C statistic standard deviate = 1.9563, p-value = 0.02522
## alternative hypothesis: Expectation greater than statistic
## sample estimates:
## Geary C statistic       Expectation          Variance 
##        0.74352008        1.00000000        0.01718847
geary.test(residual6, col.B) # Homicide
## 
##  Geary C test under randomisation
## 
## data:  residual6 
## weights: col.B 
## 
## Geary C statistic standard deviate = 4.7189, p-value = 1.186e-06
## alternative hypothesis: Expectation greater than statistic
## sample estimates:
## Geary C statistic       Expectation          Variance 
##       0.670555162       1.000000000       0.004873943
geary.test(residual1, col.S) # Robbery
## 
##  Geary C test under randomisation
## 
## data:  residual1 
## weights: col.S 
## 
## Geary C statistic standard deviate = 2.5759, p-value = 0.004999
## alternative hypothesis: Expectation greater than statistic
## sample estimates:
## Geary C statistic       Expectation          Variance 
##        0.73366013        1.00000000        0.01069089
geary.test(residual2, col.S) # Burglary
## 
##  Geary C test under randomisation
## 
## data:  residual2 
## weights: col.S 
## 
## Geary C statistic standard deviate = 5.3295, p-value = 4.924e-08
## alternative hypothesis: Expectation greater than statistic
## sample estimates:
## Geary C statistic       Expectation          Variance 
##       0.667417135       1.000000000       0.003894259
geary.test(residual3, col.S) # Shoplifting
## 
##  Geary C test under randomisation
## 
## data:  residual3 
## weights: col.S 
## 
## Geary C statistic standard deviate = 1.3471, p-value = 0.08898
## alternative hypothesis: Expectation greater than statistic
## sample estimates:
## Geary C statistic       Expectation          Variance 
##        0.80907910        1.00000000        0.02008797
geary.test(residual4, col.S) # Property Damage
## 
##  Geary C test under randomisation
## 
## data:  residual4 
## weights: col.S 
## 
## Geary C statistic standard deviate = 0.47618, p-value = 0.317
## alternative hypothesis: Expectation greater than statistic
## sample estimates:
## Geary C statistic       Expectation          Variance 
##        0.95587290        1.00000000        0.00858766
geary.test(residual5, col.S) # Assault
## 
##  Geary C test under randomisation
## 
## data:  residual5 
## weights: col.S 
## 
## Geary C statistic standard deviate = 2.5759, p-value = 0.004999
## alternative hypothesis: Expectation greater than statistic
## sample estimates:
## Geary C statistic       Expectation          Variance 
##        0.73366013        1.00000000        0.01069089
geary.test(residual6, col.S) # Homicide
## 
##  Geary C test under randomisation
## 
## data:  residual6 
## weights: col.S 
## 
## Geary C statistic standard deviate = 5.3295, p-value = 4.924e-08
## alternative hypothesis: Expectation greater than statistic
## sample estimates:
## Geary C statistic       Expectation          Variance 
##       0.667417135       1.000000000       0.003894259
quasipmod1 <- glm(Y1 ~ easting + northing, offset = log(E1),
                  data = seattle, family = quasipoisson())
quasipmod2 <- glm(Y2 ~ easting + northing, offset = log(E2),
                  data = seattle, family = quasipoisson())
quasipmod3 <- glm(Y3 ~ easting + northing, offset = log(E3),
                  data = seattle, family = quasipoisson())
quasipmod4 <- glm(Y4 ~ easting + northing, offset = log(E4),
                  data = seattle, family = quasipoisson())
quasipmod5 <- glm(Y5 ~ easting + northing, offset = log(E5),
                  data = seattle, family = quasipoisson())
quasipmod6 <- glm(Y6 ~ easting + northing, offset = log(E6),
                  data = seattle, family = quasipoisson())

sidsres1 <- residuals(quasipmod1, type = "pearson")
seattle$resB1 <- sidsres1
sidsres2 <- residuals(quasipmod2, type = "pearson")
seattle$resB2 <- sidsres2
sidsres3 <- residuals(quasipmod3, type = "pearson")
seattle$resB3 <- sidsres3
sidsres4 <- residuals(quasipmod4, type = "pearson")
seattle$resB4 <- sidsres4
sidsres5 <- residuals(quasipmod5, type = "pearson")
seattle$resB5 <- sidsres5
sidsres6 <- residuals(quasipmod6, type = "pearson")
seattle$resB6 <- sidsres6


par(mfrow=c(1,3))
spplot(seattle,"resB1")

spplot(seattle,"resB2")

spplot(seattle,"resB3")

spplot(seattle,"resB4")

summary(quasipmod1)
## 
## Call:
## glm(formula = Y1 ~ easting + northing, family = quasipoisson(), 
##     data = seattle, offset = log(E1))
## 
## Deviance Residuals: 
##    Min      1Q  Median      3Q     Max  
## -7.798  -3.090  -1.637   1.601  24.222  
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept) 8.574e+02  2.529e+02   3.390 0.000928 ***
## easting     1.601e-05  2.318e-05   0.691 0.490914    
## northing    7.388e-05  2.123e-05   3.479 0.000686 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for quasipoisson family taken to be 28.88898)
## 
##     Null deviance: 2669.6  on 131  degrees of freedom
## Residual deviance: 2331.6  on 129  degrees of freedom
## AIC: NA
## 
## Number of Fisher Scoring iterations: 5
coords <- coordinates(seattle)
plot(seattle)
text(coords, cex = 0.75)

# Test of local influence

par(mfrow=c(1,1))

lisa <- moran.plot(seattle$resB1, col.W, labels = seattle$OBJECTID,
                   xlab = "Residuals", ylab = "Spatially Lagged Residuals")

lm1 <- localmoran(seattle$resB1, col.W)
gry <- c(rev(brewer.pal(8, "Reds")[1:6]), brewer.pal(6,
                                                     "Blues"))
seattle$localM1 <- lm1[, 1]
yellow <- c(rev(brewer.pal(8, "Oranges")[1:6]), brewer.pal(6,
                                                     "Greens"))
lm5 <- localmoran(seattle$resB5, col.W)
seattle$localM5 <- lm5[, 1]

spplot(seattle, zcol = "localM1", at = c(-2.5, -1.4,
                                         -0.6, -0.2, 0, 0.2, 0.6, 1.4, 2.5), col.regions = colorRampPalette(gry)(8))

spplot(seattle, zcol = "localM5", at = c(-2.5, -1.4,
                                        -0.6, -0.2, 0, 0.2, 0.6, 1.4, 2.5), col.regions = colorRampPalette(yellow)(8))

####################################################################
# Part 3: Clustering and Smoothing
####################################################################
se1 <- sqrt(Y1/E1)
plot(Y1 ~ se1, xlab = "Standard Error", ylab = "Observed Robbery Rate",
     col = "blue", cex = 0.7, pch = 5, ylim=c(0,100))

se5 <- sqrt(Y5/E5)
plot(Y5 ~ se5, xlab = "Standard Error", ylab = "Observed Assault Rate",
     col = "pink", cex = 0.7, pch = 5, ylim=c(0,500),xlim=c(0,2.5))

# Gamma random effects model: Robbery Case
ebresults <- eBayes(Y1, E1)
## Warning in dpois(y, mu, log = TRUE): non-integer x = 8.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 8.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 16.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 25.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 19.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 22.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 26.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 53.500000
ebresults$alpha #the estimate of alpha in our model
## [1] 1.093822
exp(ebresults$beta) 
## (Intercept) 
##    1.064666
# the RRs are greater in the study region than in
# the reference region
mean(Y1/E1)
## [1] 1.06977
summary(ebresults$RR)
##     Min.  1st Qu.   Median     Mean  3rd Qu.     Max. 
##  0.08147  0.32590  0.64040  1.06500  1.35300 10.23000
# Now let's examine the gamma assumption
egamma <- qgamma(seq(0.5, length(Y1), 1)/length(Y1),
                 ebresults$alpha, ebresults$alpha)
par(mfrow = c(1, 2))
# First plot is the estimates from the gamma model
plot(egamma, exp(-ebresults$beta) * sort(Y1/E1), xlim = c(0,5), 
     ylim = c(0, 5), pch= 3, col="pink", 
     xlab = "Exp Order Stat", ylab = "Obs Order Stat (SMR)")
abline(0, 1) #Y1/E1 here = SMR on health statistics...
# Second plot is the Robbery estimates
plot(egamma, exp(-ebresults$beta) * sort(ebresults$RR),
     xlim = c(0, 5), ylim = c(0, 5), xlab = "Exp Order Stat",
     ylab = "Obs Order Stat (Gamma)", pch=2,col="gray")
abline(0, 1)

# This looks pretty reasonable...

# Considers the Smoother Model now
# Below are "Zero Adjustment"...
Ystar1 <- ifelse(Y1 == 0, 0.5, Y1)
Estar1 <- ifelse(Y1 == 0, E1 + 0.5, E1)
SMRstar1 <- Ystar1/Estar1
alphastar1 <- log(SMRstar1)
varalphastar1 <- 1/(SMRstar1 * Estar1)
SMRlower1 <- exp(alphastar1 - 1.96 * sqrt(varalphastar1))
SMRupper1 <- exp(alphastar1 + 1.96 * sqrt(varalphastar1))
SMRwidth1 <- SMRupper1 - SMRlower1

smr1<-Y1/E1
sesmr1 <- sqrt(smr1/E1)
seEBests <- sqrt((ebresults$beta+Y1)*exp(2*ebresults$beta)/
                   (ebresults$alpha+E1*ebresults$beta)^2)
par(mfrow=c(1,2))
plot(SMRstar1,ebresults$RR,xlab="SMR",ylab="EB estimates",
     xlim=c(0,max(SMRstar1)),ylim=c(0,max(SMRstar1)),pch=6,col="purple")
abline(0,1)
plot(log(SMRstar1),log(ebresults$RR),xlim=c(-3,3),
     ylim=c(-3,3),xlab="log(SMR)",ylab="log(EB estimates)",pch=5,col="green")
abline(0,1)

# The shrunk in EB Estimates is pretty obvious
seEBests <- sqrt((ebresults$alpha+Y1)*exp(2*ebresults$beta)/
                   (ebresults$alpha+E1*ebresults$beta)^2)
par(mfrow=c(1,2))
plot(se1,smr1,ylim=c(0,max(smr1)),xlim=c(0,3),
     xlab="SE SMR",ylab="SMR",pch=8,col="pink")
plot(seEBests,ebresults$RR,ylim=c(0,max(smr1)),
     xlim=c(0,7),xlab="SE Emp Bayes",ylab="Emp Bayes",pch=7,col="orange")

par(mfrow=c(1,1))
plot(seEBests ~ se1, ylab = "SE Emp Bayes", xlab = "SE SMR",
     xlim = c(0, 4), ylim = c(0, 8), pch = 1, col = "purple")
abline(0, 1, col = "green")

# Uncertainty estimates of EB estimates

apost1 <- ebresults$alpha+Y1
bpost1 <- (ebresults$alpha+E1*exp(ebresults$beta))/exp(ebresults$beta)
EBlower1 <- qgamma(0.025,apost1,bpost1)
EBupper1 <- qgamma(0.975,apost1,bpost1)
EBwidth1 <- EBupper1-EBlower1

# Comparison of Interval: EB is generally better
par(mfrow = c(1, 3))
plot(EBlower1 ~ SMRlower1, col = "orange")
abline(0, 1, col = "blue")
plot(EBupper1 ~ SMRupper1, col = "orange")
abline(0, 1, col = "blue")
plot(EBwidth1 ~ SMRwidth1, col = "orange")
abline(0, 1, col = "blue")

# Very Obvious that EB incurs smaller interval
####################################################################
ebresultsX <- eBayes(Y1,E1,pov)
## Warning in dpois(y, mu, log = TRUE): non-integer x = 8.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 8.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 16.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 25.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 19.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 22.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 26.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 53.500000
ebresultsX$alpha 
## [1] 2.014085
ebresults$alpha
## [1] 1.093822
# note the reduction in excess-Poisson variation
# compared to the no covariate model
ebresultsX$beta
## (Intercept)        Xmat 
## -1.22682904  0.09396554
ebresultsX <- eBayes(Y1,E1,pov)
## Warning in dpois(y, mu, log = TRUE): non-integer x = 8.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 8.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 16.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 25.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 19.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 22.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 26.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 53.500000
par(mfrow=c(1,1))
plot(pov,smr1,type="n")
text(pov,smr1)
xval <- seq(0,max(pov),.01)
lines(xval,exp(ebresultsX$beta[1]+ebresultsX$beta[2]*xval))

ebresultsX <- eBayes(Y1,E1,pov)
## Warning in dpois(y, mu, log = TRUE): non-integer x = 8.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 8.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 16.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 25.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 19.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 22.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 26.500000
## Warning in dpois(y, mu, log = TRUE): non-integer x = 53.500000
par(mfrow=c(1,1))
plot(pov,smr1,type="n",xlab="Proportion under Poverty",ylab="SMR Robbery")
text(pov,smr1,col="pink",cex=.4)
xval <- seq(0,max(pov),.01)
lines(xval,exp(ebresultsX$beta[1]+ebresultsX$beta[2]*xval),col="orange")

# 0.47/0.27 with pov, means variability explained by the covariance.

x0 <- rep(0,length(Y1))
x1 <- rep(1,length(Y1))
x2 <- rep(2,length(Y1))
plot(x0,smr1,xlim=c(0,2),type="n",
     ylab="Relative risks",
     xlab="",axes=F)
axis(2)
axis(1,at=c(0,1,2))
text(x0,smr1,cex=.5)
text(x1,ebresults$RR,cex=.5)
text(x2,ebresultsX$RR,cex=.5)
for (i in 1:length(Y1)){
  lines(c(0,1,2),c(smr1[i],ebresults$RR[i],ebresultsX$RR[i]),
        lty=2,col="grey")}; abline(1,0,col="red")

# Lognormal Model
# We now consider an alternative lognormal model for the relative risks, 
# but still independent. We specify the 5\% and 
# 95\% points of the relative risk associated with $\beta$ as  1 and 5.


lnprior <- LogNormalPriorCh(1,5,0.5,0.95)
lnprior
## $mu
## [1] 0
## 
## $sigma
## [1] 0.9784688
plot(seq(0,7,.1),dlnorm(seq(0,7,.1),meanlog=lnprior$mu,sdlog=lnprior$sigma),
     type="l",xlab=expression(theta),ylab="LogNormal Density")

library(INLA)
## This is INLA 0.0-1485844051, dated 2017-01-31 (09:14:12+0300).
## See www.r-inla.org/contact-us for how to get help.
library(rgdal)
## rgdal: version: 1.2-5, (SVN revision 648)
##  Geospatial Data Abstraction Library extensions to R successfully loaded
##  Loaded GDAL runtime: GDAL 2.1.2, released 2016/10/24
##  Path to GDAL shared files: /Library/Frameworks/R.framework/Versions/3.3/Resources/library/rgdal/gdal
##  Loaded PROJ.4 runtime: Rel. 4.9.1, 04 March 2015, [PJ_VERSION: 491]
##  Path to PROJ.4 shared files: /Library/Frameworks/R.framework/Versions/3.3/Resources/library/rgdal/proj
##  Linking to sp version: 1.2-3
library(RColorBrewer)

spplot(seattle, c("smr1"),at=c(0, 0.25, 0.5, 1, 2, 4, 7, 12, 20),
       col.regions=(brewer.pal(8, "Blues")))

spplot(seattle, c("smr2"),at=c(0, 0.25, 0.5, 1, 2, 4, 7, 12, 20),
       col.regions=(brewer.pal(8, "PuRd")))

spplot(seattle, c("smr3"),at=c(0, 0.25, 0.5, 1, 2, 4, 7, 12, 20),
       col.regions=(brewer.pal(8, "Purples")))

spplot(seattle, c("smr4"),at=c(0, 0.25, 0.5, 1, 2, 4, 7, 12, 20),
       col.regions=(brewer.pal(8, "Oranges")))

spplot(seattle, c("smr5"),at=c(0, 0.25, 0.5, 1, 2, 4, 7, 12, 20),
       col.regions=(brewer.pal(8, "PuBuGn")))

spplot(seattle, c("smr6"),at=c(0, 0.25, 0.5, 1, 2, 4, 7, 12, 20),
       col.regions=(brewer.pal(8, "GnBu")))

sea.fit1 <- inla(Y1 ~ 1 + f(tract, model ="iid", param=c(1,0.026)), 
                 data=merged, family="poisson", E=E1, 
                 control.predictor=list(compute=T)) 
summary(sea.fit1)
## 
## Call:
## c("inla(formula = Y1 ~ 1 + f(tract, model = \"iid\", param = c(1, ",  "    0.026)), family = \"poisson\", data = merged, E = E1, control.predictor = list(compute = T))" )
## 
## Time used:
##  Pre-processing    Running inla Post-processing           Total 
##          0.8320          0.2202          0.0870          1.1392 
## 
## Fixed effects:
##                mean    sd 0.025quant 0.5quant 0.975quant    mode kld
## (Intercept) -0.4322 0.092    -0.6151  -0.4315    -0.2532 -0.4301   0
## 
## Random effects:
## Name   Model
##  tract   IID model 
## 
## Model hyperparameters:
##                      mean     sd 0.025quant 0.5quant 0.975quant   mode
## Precision for tract 1.019 0.1329     0.7836    1.012      1.298 0.9955
## 
## Expected number of effective parameters(std dev): 119.68(1.241)
## Number of equivalent replicates : 1.103 
## 
## Marginal log-Likelihood:  -527.80 
## Posterior marginals for linear predictor and fitted values computed
plot(sea.fit1, plot.hyperparameter=FALSE, 
     plot.random.effects=TRUE, plot.fixed.effects=FALSE, prefix="logmodplot1", postscript=T)
sea.fit1$summary.fixed
##                   mean         sd 0.025quant   0.5quant 0.975quant
## (Intercept) -0.4321689 0.09202934 -0.6151154 -0.4314789 -0.2531982
##                   mode          kld
## (Intercept) -0.4300898 1.765568e-12
# Comparison of lognormal and gamma models
# First illustrate how to extract the intercept
# (beta0)
lnorminter1 <- sea.fit1$summary.fixed[4]
# Now extract the medians of the random effects
# (which are centered around alpha)
lnormREs1 <- exp(sea.fit1$summary.random$tract[5])

lnormRRs1 <- as.double(exp(lnorminter1))*lnormREs1[,1]
plot(ebresults$RR,lnormRRs1,xlim=c(0,4.5),ylim=c(0,4.5),
     xlab="Gamma RRs",ylab="Lognormal RRs")
abline(0,1)


# See the difference of smoother

seattle$RRgam <- ebresults$RR
spplot(seattle, c("RRgam"),
       col.regions=colorRampPalette(rev(brewer.pal(8, "RdBu")))(50) )

seattle$RRlnorm1 <- lnormRRs1
spplot(seattle, c("RRlnorm1"),
       col.regions=colorRampPalette(rev(brewer.pal(8, "RdBu")))(50) )

## Uncertainty estimates of Lognormal estimates

LNlower1 <- exp(sea.fit1$summary.linear.predictor["0.025quant"])
LNupper1 <- exp(sea.fit1$summary.linear.predictor["0.975quant"])
LNwidth1 <- LNupper1[,1]-LNlower1[,1]

par(mfrow = c(1, 3))
plot(LNlower1[, 1] ~ SMRlower1, col = "cyan")
abline(0, 1, col = "pink")
plot(LNupper1[, 1] ~ SMRupper1, col = "cyan")
abline(0, 1, col = "pink")
plot(LNwidth1 ~ SMRwidth1, col = "cyan")
abline(0, 1, col = "pink")

## Lognormal model with covariates
merged$pov<-as.numeric(as.character(merged$pov))
## Warning: NAs introduced by coercion
sea.fit1X <- inla(Y1 ~1+I(pov)+f(tract, model="iid", 
                                       param=c(1,0.014)),data=merged, family="poisson",E=E1)
summary(sea.fit1X)
## 
## Call:
## c("inla(formula = Y1 ~ 1 + I(pov) + f(tract, model = \"iid\", param = c(1, ",  "    0.014)), family = \"poisson\", data = merged, E = E1)")
## 
## Time used:
##  Pre-processing    Running inla Post-processing           Total 
##          0.7041          0.1943          0.0698          0.9682 
## 
## Fixed effects:
##                mean     sd 0.025quant 0.5quant 0.975quant    mode kld
## (Intercept) -1.3774 0.1218    -1.6201  -1.3762    -1.1413 -1.3738   0
## I(pov)       0.0858 0.0085     0.0692   0.0858     0.1026  0.0857   0
## 
## Random effects:
## Name   Model
##  tract   IID model 
## 
## Model hyperparameters:
##                      mean     sd 0.025quant 0.5quant 0.975quant  mode
## Precision for tract 1.978 0.2726      1.498    1.961      2.553 1.925
## 
## Expected number of effective parameters(std dev): 111.22(2.07)
## Number of equivalent replicates : 1.187 
## 
## Marginal log-Likelihood:  -497.31
sea.fit1X$summary.fixed[2,]
##              mean          sd 0.025quant   0.5quant 0.975quant       mode
## I(pov) 0.08580018 0.008504527  0.0692187 0.08575049  0.1026415 0.08565191
##                 kld
## I(pov) 1.675912e-12
exp(sea.fit1X$summary.fixed[2,])
##            mean       sd 0.025quant 0.5quant 0.975quant     mode kld
## I(pov) 1.089589 1.008541   1.071671 1.089534   1.108094 1.089427   1
sea.fit1X$summary.fixed
##                    mean          sd 0.025quant    0.5quant 0.975quant
## (Intercept) -1.37736242 0.121756727 -1.6201021 -1.37618344 -1.1413385
## I(pov)       0.08580018 0.008504527  0.0692187  0.08575049  0.1026415
##                    mode          kld
## (Intercept) -1.37381458 1.842294e-12
## I(pov)       0.08565191 1.675912e-12
## Lognormal spatial model with covariates

## We now add spatial (ICAR) random effects to the model.

## We need a graph file containing the neighbors.
nb2INLA("seasss.graph",sea.nb)


## For INLA, the region unique identifier must be INTEGER. So, we have to
## to temporarily create an identifier variable here.
merged$region <- c(1:132)
merged$region2 <- merged$region
sea.fit2 <- inla(Y1 ~ 1 + I(nohs) + 
                        f(region,model="iid",param=c(1,0.014)) + 
                        f(region2,model="besag",graph=
                            "seasss.graph",param=c(1,0.68)),data=merged,
                 family="poisson",E=E1,control.predictor=list(compute=TRUE))
summary(sea.fit2)
## 
## Call:
## c("inla(formula = Y1 ~ 1 + I(nohs) + f(region, model = \"iid\", param = c(1, ",  "    0.014)) + f(region2, model = \"besag\", graph = \"seasss.graph\", ",  "    param = c(1, 0.68)), family = \"poisson\", data = merged, E = E1, ",  "    control.predictor = list(compute = TRUE))")
## 
## Time used:
##  Pre-processing    Running inla Post-processing           Total 
##          0.9671          0.8177          0.1174          1.9022 
## 
## Fixed effects:
##                mean     sd 0.025quant 0.5quant 0.975quant    mode kld
## (Intercept) -0.7763 0.0961    -0.9694  -0.7749    -0.5913 -0.7721   0
## I(nohs)      0.0503 0.0105     0.0298   0.0503     0.0709  0.0502   0
## 
## Random effects:
## Name   Model
##  region   IID model 
## region2   Besags ICAR model 
## 
## Model hyperparameters:
##                        mean     sd 0.025quant 0.5quant 0.975quant  mode
## Precision for region  2.867 0.7941     1.6142    2.765      4.704 2.573
## Precision for region2 1.392 0.6442     0.5341    1.262      2.998 1.040
## 
## Expected number of effective parameters(std dev): 112.69(1.90)
## Number of equivalent replicates : 1.171 
## 
## Marginal log-Likelihood:  -603.57 
## Posterior marginals for linear predictor and fitted values computed
REsnonspat1 <- exp(sea.fit2$summary.random$region[5])
REsspat1 <- exp(sea.fit2$summary.random$region2[5])
seattle$REsnonspat <- REsnonspat1[,1]
seattle$REsspat <- REsspat1[,1]
spplot(seattle, c("REsnonspat"),
       col.regions=colorRampPalette(rev(brewer.pal(8, "RdBu")))(50) )


## Lognormal spatial model with covariates: non-spatial random effects


spplot(seattle, c("REsnonspat"),
       col.regions=colorRampPalette(rev(brewer.pal(8, "RdBu")))(50) )


## Lognormal spatial model with covariates: spatial random effects
spplot(seattle, c("REsspat"),
       col.regions=colorRampPalette(rev(brewer.pal(8, "RdBu")))(50) )


## Comparison of spatial lognormal and gamma fits: some differences
plot(ebresults$RR,sea.fit2$summary.fitted.values[,4],
     xlab="Gamma fitted",ylab="Spatial fitted")
abline(0,1)


## Proportion of Variation that is Spatial

mat.marg <- matrix(NA, nrow = 132, ncol = 1000)
m <- sea.fit2$marginals.random$region2
for (i in 1:132) {
  Sre <- m[[i]]
  mat.marg[i, ] <- inla.rmarginal(1000, Sre)
}
var.Sre <- apply(mat.marg, 2, var)
var.eps <- inla.rmarginal(1000, inla.tmarginal(function(x) 1/x
                                               ,sea.fit2$marginals.hyper$"Precision for region"))
mean(var.Sre)
## [1] 0.3608812
mean(var.eps)
## [1] 0.3706733
perc.var.Sre <- mean(var.Sre/(var.Sre + var.eps))
perc.var.Sre
## [1] 0.5009203
## Uncertainty estimates of spatial estimates

LN2lower <- exp(sea.fit2$summary.linear.predictor["0.025quant"])
LN2upper <- exp(sea.fit2$summary.linear.predictor["0.975quant"])
LN2width <- LN2upper[,1]-LN2lower[,1]


par(mfrow=c(1,3))
plot(LN2lower[,1]~SMRlower1,col="green")
abline(0,1,col="blue")
plot(LN2upper[,1]~SMRupper1,col="green")
abline(0,1,col="blue")
plot(LN2width~SMRwidth1,col="green")
abline(0,1,col="blue")

##########################################################################
# Part 4 Spatial Regression and GWR
##########################################################################
# Spatial Regression, OLS
# 
seattle$pov<-as.numeric(as.character(seattle$pov))
## Warning: NAs introduced by coercion
sea.ols1 <-lm(rob.p~ pov+hetero+nohs, data=seattle@data)
summary(sea.ols1)
## 
## Call:
## lm(formula = rob.p ~ pov + hetero + nohs, data = seattle@data)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -10.273  -1.902  -0.570   0.818  35.266 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept) -87.82495   45.63807  -1.924   0.0565 .  
## pov           0.43784    0.07244   6.044 1.55e-08 ***
## hetero        0.91961    0.47897   1.920   0.0571 .  
## nohs          0.09123    0.07330   1.245   0.2156    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 5.106 on 127 degrees of freedom
##   (1 observation deleted due to missingness)
## Multiple R-squared:  0.3579, Adjusted R-squared:  0.3428 
## F-statistic:  23.6 on 3 and 127 DF,  p-value: 3.316e-12
sea.ols2 <-lm(shop.p~ pov+hetero+nohs, data=seattle@data)
summary(sea.ols2)
## 
## Call:
## lm(formula = shop.p ~ pov + hetero + nohs, data = seattle@data)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -358.98  -54.89  -14.74   20.43 1464.64 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)    
## (Intercept) -3644.3940  1628.0838  -2.238 0.026933 *  
## pov            10.0276     2.5842   3.880 0.000167 ***
## hetero         37.9156    17.0866   2.219 0.028261 *  
## nohs            0.1037     2.6150   0.040 0.968442    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 182.2 on 127 degrees of freedom
##   (1 observation deleted due to missingness)
## Multiple R-squared:   0.16,  Adjusted R-squared:  0.1401 
## F-statistic: 8.062 on 3 and 127 DF,  p-value: 5.859e-05
sea.ols3 <-lm(burg.p~ pov+hetero+nohs, data=seattle@data)
summary(sea.ols3)
## 
## Call:
## lm(formula = burg.p ~ pov + hetero + nohs, data = seattle@data)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -30.643  -5.590  -1.025   4.263  54.594 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept) 369.9582    82.4202   4.489 1.59e-05 ***
## pov          -0.1985     0.1308  -1.517 0.131635    
## hetero       -3.5841     0.8650  -4.143 6.20e-05 ***
## nohs          0.5017     0.1324   3.790 0.000232 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 9.221 on 127 degrees of freedom
##   (1 observation deleted due to missingness)
## Multiple R-squared:  0.2723, Adjusted R-squared:  0.2551 
## F-statistic: 15.84 on 3 and 127 DF,  p-value: 8.259e-09
sea.ols4 <-lm(pdam.p~ pov+hetero+nohs, data=seattle@data)
summary(sea.ols4)
## 
## Call:
## lm(formula = pdam.p ~ pov + hetero + nohs, data = seattle@data)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -71.641 -12.165  -3.638   3.804 240.357 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept) -574.7082   298.0909  -1.928   0.0561 .  
## pov            2.6899     0.4731   5.685 8.53e-08 ***
## hetero         6.1413     3.1284   1.963   0.0518 .  
## nohs          -0.1979     0.4788  -0.413   0.6801    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 33.35 on 127 degrees of freedom
##   (1 observation deleted due to missingness)
## Multiple R-squared:  0.262,  Adjusted R-squared:  0.2445 
## F-statistic: 15.03 on 3 and 127 DF,  p-value: 1.985e-08
sea.ols5 <-lm(hom.p~ pov+hetero+nohs, data=seattle@data)
summary(sea.ols5)
## 
## Call:
## lm(formula = hom.p ~ pov + hetero + nohs, data = seattle@data)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -1.2201 -0.2326 -0.1073  0.1023  3.6645 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)    
## (Intercept) -10.151863   5.208515  -1.949  0.05349 .  
## pov           0.037531   0.008267   4.540 1.29e-05 ***
## hetero        0.105067   0.054663   1.922  0.05683 .  
## nohs          0.025569   0.008366   3.056  0.00273 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.5827 on 127 degrees of freedom
##   (1 observation deleted due to missingness)
## Multiple R-squared:  0.3557, Adjusted R-squared:  0.3405 
## F-statistic: 23.37 on 3 and 127 DF,  p-value: 4.125e-12
sea.ols6 <-lm(ass.p~ pov+hetero+nohs, data=seattle@data)
summary(sea.ols6)
## 
## Call:
## lm(formula = ass.p ~ pov + hetero + nohs, data = seattle@data)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -156.49  -23.16   -5.20   13.77  515.86 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)    
## (Intercept) -1996.8341   610.2734  -3.272  0.00138 ** 
## pov             6.4692     0.9687   6.678 6.79e-10 ***
## hetero         20.7985     6.4048   3.247  0.00149 ** 
## nohs           -1.0773     0.9802  -1.099  0.27384    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 68.28 on 127 degrees of freedom
##   (1 observation deleted due to missingness)
## Multiple R-squared:  0.3192, Adjusted R-squared:  0.3031 
## F-statistic: 19.85 on 3 and 127 DF,  p-value: 1.292e-10
# Creating Queen-Bound Neighborhoods
list.queen<-poly2nb(seattle, queen=TRUE) # share a side or an edge
W<-nb2listw(list.queen, style="W", zero.policy=TRUE) 
W
## Characteristics of weights list object:
## Neighbour list object:
## Number of regions: 132 
## Number of nonzero links: 732 
## Percentage nonzero weights: 4.201102 
## Average number of links: 5.545455 
## 
## Weights style: W 
## Weights constants summary:
##     n    nn  S0       S1       S2
## W 132 17424 132 50.30071 539.6371
plot(W,coordinates(seattle))
coords<-coordinates(seattle)
W_dist<-dnearneigh(coords,0,1,longlat = FALSE)

moran.lm<-lm.morantest(sea.ols1, W, alternative="two.sided")
print(moran.lm)
## 
##  Global Moran I for regression residuals
## 
## data:  
## model: lm(formula = rob.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## Moran I statistic standard deviate = 3.0257, p-value = 0.002481
## alternative hypothesis: two.sided
## sample estimates:
## Observed Moran I      Expectation         Variance 
##      0.138983485     -0.018494855      0.002708969
moran.lm<-lm.morantest(sea.ols2, W, alternative="two.sided")
print(moran.lm)
## 
##  Global Moran I for regression residuals
## 
## data:  
## model: lm(formula = shop.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## Moran I statistic standard deviate = 0.01908, p-value = 0.9848
## alternative hypothesis: two.sided
## sample estimates:
## Observed Moran I      Expectation         Variance 
##     -0.017501790     -0.018494855      0.002708969
moran.lm<-lm.morantest(sea.ols3, W, alternative="two.sided")
print(moran.lm)
## 
##  Global Moran I for regression residuals
## 
## data:  
## model: lm(formula = burg.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## Moran I statistic standard deviate = 5.3356, p-value = 9.522e-08
## alternative hypothesis: two.sided
## sample estimates:
## Observed Moran I      Expectation         Variance 
##      0.259212312     -0.018494855      0.002708969
moran.lm<-lm.morantest(sea.ols4, W, alternative="two.sided")
print(moran.lm)
## 
##  Global Moran I for regression residuals
## 
## data:  
## model: lm(formula = pdam.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## Moran I statistic standard deviate = 3.4875, p-value = 0.0004876
## alternative hypothesis: two.sided
## sample estimates:
## Observed Moran I      Expectation         Variance 
##      0.163020837     -0.018494855      0.002708969
moran.lm<-lm.morantest(sea.ols5, W, alternative="two.sided")
print(moran.lm)
## 
##  Global Moran I for regression residuals
## 
## data:  
## model: lm(formula = hom.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## Moran I statistic standard deviate = 1.904, p-value = 0.05691
## alternative hypothesis: two.sided
## sample estimates:
## Observed Moran I      Expectation         Variance 
##      0.080603078     -0.018494855      0.002708969
moran.lm<-lm.morantest(sea.ols6, W, alternative="two.sided")
print(moran.lm)
## 
##  Global Moran I for regression residuals
## 
## data:  
## model: lm(formula = ass.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## Moran I statistic standard deviate = 5.0652, p-value = 4.08e-07
## alternative hypothesis: two.sided
## sample estimates:
## Observed Moran I      Expectation         Variance 
##      0.245135804     -0.018494855      0.002708969
#Lagrange-Multiplier Test
LM1 <-lm.LMtests(sea.ols1, W, test="all")
print(LM1)
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = rob.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## LMerr = 6.5507, df = 1, p-value = 0.01048
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = rob.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## LMlag = 17.759, df = 1, p-value = 2.507e-05
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = rob.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## RLMerr = 5.3479, df = 1, p-value = 0.02075
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = rob.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## RLMlag = 16.556, df = 1, p-value = 4.722e-05
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = rob.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## SARMA = 23.107, df = 2, p-value = 9.601e-06
LM2 <-lm.LMtests(sea.ols2, W, test="all")
print(LM2)
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = shop.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## LMerr = 0.10388, df = 1, p-value = 0.7472
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = shop.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## LMlag = 0.63963, df = 1, p-value = 0.4238
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = shop.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## RLMerr = 11.18, df = 1, p-value = 0.0008268
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = shop.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## RLMlag = 11.716, df = 1, p-value = 0.0006197
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = shop.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## SARMA = 11.82, df = 2, p-value = 0.002713
LM3 <-lm.LMtests(sea.ols3, W, test="all")
print(LM3)
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = burg.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## LMerr = 22.786, df = 1, p-value = 1.811e-06
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = burg.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## LMlag = 48.846, df = 1, p-value = 2.769e-12
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = burg.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## RLMerr = 19.196, df = 1, p-value = 1.18e-05
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = burg.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## RLMlag = 45.255, df = 1, p-value = 1.729e-11
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = burg.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## SARMA = 68.042, df = 2, p-value = 1.665e-15
LM4 <-lm.LMtests(sea.ols3, W, test="all")
print(LM4)
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = burg.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## LMerr = 22.786, df = 1, p-value = 1.811e-06
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = burg.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## LMlag = 48.846, df = 1, p-value = 2.769e-12
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = burg.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## RLMerr = 19.196, df = 1, p-value = 1.18e-05
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = burg.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## RLMlag = 45.255, df = 1, p-value = 1.729e-11
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = burg.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## SARMA = 68.042, df = 2, p-value = 1.665e-15
LM5 <-lm.LMtests(sea.ols3, W, test="all")
print(LM5)
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = burg.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## LMerr = 22.786, df = 1, p-value = 1.811e-06
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = burg.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## LMlag = 48.846, df = 1, p-value = 2.769e-12
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = burg.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## RLMerr = 19.196, df = 1, p-value = 1.18e-05
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = burg.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## RLMlag = 45.255, df = 1, p-value = 1.729e-11
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = burg.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## SARMA = 68.042, df = 2, p-value = 1.665e-15
LM6 <-lm.LMtests(sea.ols3, W, test="all")
print(LM6)
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = burg.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## LMerr = 22.786, df = 1, p-value = 1.811e-06
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = burg.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## LMlag = 48.846, df = 1, p-value = 2.769e-12
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = burg.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## RLMerr = 19.196, df = 1, p-value = 1.18e-05
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = burg.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## RLMlag = 45.255, df = 1, p-value = 1.729e-11
## 
## 
##  Lagrange multiplier diagnostics for spatial dependence
## 
## data:  
## model: lm(formula = burg.p ~ pov + hetero + nohs, data =
## seattle@data)
## weights: W
## 
## SARMA = 68.042, df = 2, p-value = 1.665e-15
seattle$pov[89]<-32.101 # So strange this data turns to NA

sar.sea1 <-lagsarlm(rob.p ~ pov+nohs+hetero,data=seattle@data, W)
summary(sar.sea1)
## 
## Call:lagsarlm(formula = rob.p ~ pov + nohs + hetero, data = seattle@data, 
##     listw = W)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -11.17001  -1.67601  -0.60016   0.91783  34.68625 
## 
## Type: lag 
## Coefficients: (asymptotic standard errors) 
##               Estimate Std. Error z value  Pr(>|z|)
## (Intercept) -88.721016  41.324996 -2.1469   0.03180
## pov           0.327384   0.066575  4.9176 8.763e-07
## nohs          0.048263   0.067564  0.7143   0.47502
## hetero        0.923538   0.433590  2.1300   0.03317
## 
## Rho: 0.37481, LR test value: 14.695, p-value: 0.00012635
## Asymptotic standard error: 0.10308
##     z-value: 3.636, p-value: 0.00027686
## Wald statistic: 13.221, p-value: 0.00027686
## 
## Log likelihood: -394.6745 for lag model
## ML residual variance (sigma squared): 22.494, (sigma: 4.7428)
## Number of observations: 132 
## Number of parameters estimated: 6 
## AIC: 801.35, (AIC for lm: 814.04)
## LM test for residual autocorrelation
## test value: 6.4542, p-value: 0.011069
impacts(sar.sea1, listw=W)
## Impact measures (lag, exact):
##            Direct   Indirect      Total
## pov    0.33755721 0.18609502 0.52365223
## nohs   0.04976294 0.02743427 0.07719721
## hetero 0.95223556 0.52496670 1.47720225
sar.sea2 <-lagsarlm(shop.p ~ pov+nohs+hetero,data=seattle@data, W,zero.policy=TRUE)
summary(sar.sea2)
## 
## Call:
## lagsarlm(formula = shop.p ~ pov + nohs + hetero, data = seattle@data, 
##     listw = W, zero.policy = TRUE)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -386.838  -51.434  -13.709   17.209 1474.904 
## 
## Type: lag 
## Coefficients: (asymptotic standard errors) 
##                Estimate  Std. Error z value  Pr(>|z|)
## (Intercept) -3729.46101  1558.80474 -2.3925 0.0167334
## pov             8.63876     2.46982  3.4977 0.0004693
## nohs            0.74392     2.50391  0.2971 0.7663878
## hetero         38.83705    16.35337  2.3749 0.0175554
## 
## Rho: 0.10593, LR test value: 0.72978, p-value: 0.39295
## Asymptotic standard error: 0.12878
##     z-value: 0.82255, p-value: 0.41076
## Wald statistic: 0.67659, p-value: 0.41076
## 
## Log likelihood: -871.819 for lag model
## ML residual variance (sigma squared): 31869, (sigma: 178.52)
## Number of observations: 132 
## Number of parameters estimated: 6 
## AIC: 1755.6, (AIC for lm: 1754.4)
## LM test for residual autocorrelation
## test value: 10.107, p-value: 0.0014769
impacts(sar.sea2, listw=W)
## Impact measures (lag, exact):
##            Direct   Indirect      Total
## pov     8.6571342 1.00515224  9.6622864
## nohs    0.7455003 0.08655766  0.8320579
## hetero 38.9196598 4.51883758 43.4384974
sar.sea3 <-lagsarlm(burg.p ~ pov+nohs+hetero,data=seattle@data, W)
summary(sar.sea3)
## 
## Call:
## lagsarlm(formula = burg.p ~ pov + nohs + hetero, data = seattle@data, 
##     listw = W)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -28.33712  -3.95278  -0.31835   4.14938  42.64966 
## 
## Type: lag 
## Coefficients: (asymptotic standard errors) 
##               Estimate Std. Error z value Pr(>|z|)
## (Intercept) 123.098855  66.656578  1.8468  0.06478
## pov          -0.119710   0.103840 -1.1528  0.24898
## nohs          0.076538   0.106514  0.7186  0.47240
## hetero       -1.189823   0.695999 -1.7095  0.08735
## 
## Rho: 0.69327, LR test value: 44.615, p-value: 2.3989e-11
## Asymptotic standard error: 0.074046
##     z-value: 9.3627, p-value: < 2.22e-16
## Wald statistic: 87.661, p-value: < 2.22e-16
## 
## Log likelihood: -462.3302 for lag model
## ML residual variance (sigma squared): 57.307, (sigma: 7.5701)
## Number of observations: 132 
## Number of parameters estimated: 6 
## AIC: 936.66, (AIC for lm: 979.27)
## LM test for residual autocorrelation
## test value: 15.449, p-value: 8.4754e-05
impacts(sar.sea3, listw=W)
## Impact measures (lag, exact):
##             Direct   Indirect      Total
## pov    -0.13794036 -0.2523399 -0.3902803
## nohs    0.08819392  0.1613367  0.2495307
## hetero -1.37101470 -2.5080534 -3.8790681
sar.sea4 <-lagsarlm(pdam.p ~ pov+nohs+hetero,data=seattle@data, W)
summary(sar.sea4)
## 
## Call:
## lagsarlm(formula = pdam.p ~ pov + nohs + hetero, data = seattle@data, 
##     listw = W)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -68.0480 -11.4860  -3.4172   3.2943 238.3884 
## 
## Type: lag 
## Coefficients: (asymptotic standard errors) 
##               Estimate Std. Error z value Pr(>|z|)
## (Intercept) -589.45496  272.80732 -2.1607  0.03072
## pov            2.10011    0.43995  4.7735 1.81e-06
## nohs          -0.39344    0.43661 -0.9011  0.36752
## hetero         6.22389    2.86391  2.1732  0.02976
## 
## Rho: 0.36715, LR test value: 12.656, p-value: 0.00037435
## Asymptotic standard error: 0.10679
##     z-value: 3.4379, p-value: 0.00058617
## Wald statistic: 11.819, p-value: 0.00058617
## 
## Log likelihood: -643.4249 for lag model
## ML residual variance (sigma squared): 976, (sigma: 31.241)
## Number of observations: 132 
## Number of parameters estimated: 6 
## AIC: 1298.8, (AIC for lm: 1309.5)
## LM test for residual autocorrelation
## test value: 1.4295, p-value: 0.23184
impacts(sar.sea4,listw=W)
## Impact measures (lag, exact):
##            Direct  Indirect      Total
## pov     2.1623745  1.156136  3.3185104
## nohs   -0.4051076 -0.216595 -0.6217025
## hetero  6.4084201  3.426328  9.8347483
sar.sea5 <-lagsarlm(hom.p ~ pov+nohs+hetero,data=seattle@data, W)
summary(sar.sea5)
## 
## Call:lagsarlm(formula = hom.p ~ pov + nohs + hetero, data = seattle@data, 
##     listw = W)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.16607 -0.21533 -0.08941  0.11666  3.63649 
## 
## Type: lag 
## Coefficients: (asymptotic standard errors) 
##                Estimate  Std. Error z value  Pr(>|z|)
## (Intercept) -10.5189476   4.8468353 -2.1703   0.02999
## pov           0.0303040   0.0077116  3.9297 8.506e-05
## nohs          0.0199241   0.0082190  2.4242   0.01534
## hetero        0.1087797   0.0508424  2.1395   0.03239
## 
## Rho: 0.28978, LR test value: 6.8787, p-value: 0.0087229
## Asymptotic standard error: 0.1093
##     z-value: 2.6513, p-value: 0.0080193
## Wald statistic: 7.0292, p-value: 0.0080193
## 
## Log likelihood: -111.0833 for lag model
## ML residual variance (sigma squared): 0.30992, (sigma: 0.55671)
## Number of observations: 132 
## Number of parameters estimated: 6 
## AIC: 234.17, (AIC for lm: 239.05)
## LM test for residual autocorrelation
## test value: 2.1175, p-value: 0.14562
impacts(sar.sea5,listw=W)
## Impact measures (lag, exact):
##            Direct   Indirect      Total
## pov    0.03083493 0.01183334 0.04266827
## nohs   0.02027315 0.00778011 0.02805326
## hetero 0.11068544 0.04247711 0.15316255
sar.sea6 <-lagsarlm(ass.p ~ pov+nohs+hetero,data=seattle@data, W)
summary(sar.sea6)
## 
## Call:lagsarlm(formula = ass.p ~ pov + nohs + hetero, data = seattle@data, 
##     listw = W)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -118.5483  -20.7028   -3.9402    9.3888  494.3144 
## 
## Type: lag 
## Coefficients: (asymptotic standard errors) 
##                Estimate  Std. Error z value  Pr(>|z|)
## (Intercept) -1565.41609   523.36931 -2.9910  0.002780
## pov             4.47646     0.84675  5.2866 1.246e-07
## nohs           -0.88677     0.83293 -1.0646  0.287035
## hetero         16.23607     5.49167  2.9565  0.003112
## 
## Rho: 0.48144, LR test value: 26.565, p-value: 2.5477e-07
## Asymptotic standard error: 0.093452
##     z-value: 5.1518, p-value: 2.5806e-07
## Wald statistic: 26.541, p-value: 2.5806e-07
## 
## Log likelihood: -730.1736 for lag model
## ML residual variance (sigma squared): 3552.6, (sigma: 59.604)
## Number of observations: 132 
## Number of parameters estimated: 6 
## AIC: 1472.3, (AIC for lm: 1496.9)
## LM test for residual autocorrelation
## test value: 1.5006, p-value: 0.22057
impacts(sar.sea6,listw=W)
## Impact measures (lag, exact):
##            Direct   Indirect     Total
## pov     4.7276646  3.9048306  8.632495
## nohs   -0.9365364 -0.7735354 -1.710072
## hetero 17.1471792 14.1627708 31.309950
# Spatial Error Model

sar.err1 <-errorsarlm(rob.p ~ pov+nohs+hetero,data=seattle@data, W)
summary(sar.err1)
## 
## Call:
## errorsarlm(formula = rob.p ~ pov + nohs + hetero, data = seattle@data, 
##     listw = W)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -9.68969 -1.76715 -0.57410  0.79482 35.98544 
## 
## Type: error 
## Coefficients: (asymptotic standard errors) 
##               Estimate Std. Error z value  Pr(>|z|)
## (Intercept) -77.179477  47.003480 -1.6420   0.10059
## pov           0.344465   0.073958  4.6576 3.199e-06
## nohs          0.069371   0.082943  0.8364   0.40295
## hetero        0.817364   0.493347  1.6568   0.09757
## 
## Lambda: 0.36313, LR test value: 8.2639, p-value: 0.0040442
## Asymptotic standard error: 0.11557
##     z-value: 3.1421, p-value: 0.0016773
## Wald statistic: 9.873, p-value: 0.0016773
## 
## Log likelihood: -397.8903 for error model
## ML residual variance (sigma squared): 23.662, (sigma: 4.8644)
## Number of observations: 132 
## Number of parameters estimated: 6 
## AIC: 807.78, (AIC for lm: 814.04)
sar.err2 <-errorsarlm(shop.p ~ pov+nohs+hetero,data=seattle@data, W,zero.policy=TRUE)
summary(sar.err2)
## 
## Call:
## errorsarlm(formula = shop.p ~ pov + nohs + hetero, data = seattle@data, 
##     listw = W, zero.policy = TRUE)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -355.209  -56.245  -15.171   24.048 1461.731 
## 
## Type: error 
## Coefficients: (asymptotic standard errors) 
##                Estimate  Std. Error z value  Pr(>|z|)
## (Intercept) -4043.22055  1545.92646 -2.6154  0.008912
## pov             9.66431     2.42727  3.9816 6.846e-05
## nohs            0.50021     2.46796  0.2027  0.839384
## hetero         42.12084    16.21484  2.5977  0.009386
## 
## Lambda: -0.02752, LR test value: 0.031519, p-value: 0.85909
## Asymptotic standard error: 0.14225
##     z-value: -0.19346, p-value: 0.8466
## Wald statistic: 0.037427, p-value: 0.8466
## 
## Log likelihood: -872.1681 for error model
## ML residual variance (sigma squared): 32101, (sigma: 179.17)
## Number of observations: 132 
## Number of parameters estimated: 6 
## AIC: 1756.3, (AIC for lm: 1754.4)
sar.err3 <-errorsarlm(burg.p ~ pov+nohs+hetero,data=seattle@data, W)
summary(sar.err3)
## 
## Call:
## errorsarlm(formula = burg.p ~ pov + nohs + hetero, data = seattle@data, 
##     listw = W)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -24.82863  -4.19759   0.14216   4.16769  31.45929 
## 
## Type: error 
## Coefficients: (asymptotic standard errors) 
##              Estimate Std. Error z value  Pr(>|z|)
## (Intercept) 11.858346  74.236568  0.1597 0.8730880
## pov          0.014258   0.114123  0.1249 0.9005744
## nohs        -0.565696   0.150868 -3.7496 0.0001771
## hetero       0.230788   0.779624  0.2960 0.7672115
## 
## Lambda: 0.85706, LR test value: 53.376, p-value: 2.7545e-13
## Asymptotic standard error: 0.045248
##     z-value: 18.941, p-value: < 2.22e-16
## Wald statistic: 358.78, p-value: < 2.22e-16
## 
## Log likelihood: -457.9496 for error model
## ML residual variance (sigma squared): 48.663, (sigma: 6.9759)
## Number of observations: 132 
## Number of parameters estimated: 6 
## AIC: 927.9, (AIC for lm: 979.27)
sar.err4 <-errorsarlm(pdam.p ~ pov+nohs+hetero,data=seattle@data, W)
summary(sar.err4)
## 
## Call:
## errorsarlm(formula = pdam.p ~ pov + nohs + hetero, data = seattle@data, 
##     listw = W)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -61.5427 -12.4335  -4.2243   3.4732 239.0113 
## 
## Type: error 
## Coefficients: (asymptotic standard errors) 
##               Estimate Std. Error z value  Pr(>|z|)
## (Intercept) -581.75509  306.90186 -1.8956   0.05802
## pov            2.28009    0.48269  4.7237 2.316e-06
## nohs          -0.64576    0.54741 -1.1797   0.23814
## hetero         6.27482    3.22153  1.9478   0.05144
## 
## Lambda: 0.39785, LR test value: 9.9265, p-value: 0.0016291
## Asymptotic standard error: 0.1122
##     z-value: 3.5459, p-value: 0.00039125
## Wald statistic: 12.574, p-value: 0.00039125
## 
## Log likelihood: -644.7896 for error model
## ML residual variance (sigma squared): 991.19, (sigma: 31.483)
## Number of observations: 132 
## Number of parameters estimated: 6 
## AIC: 1301.6, (AIC for lm: 1309.5)
sar.err5 <-errorsarlm(hom.p ~ pov+nohs+hetero,data=seattle@data, W)
summary(sar.err5)
## 
## Call:
## errorsarlm(formula = hom.p ~ pov + nohs + hetero, data = seattle@data, 
##     listw = W)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.12057 -0.23700 -0.10207  0.10391  3.69915 
## 
## Type: error 
## Coefficients: (asymptotic standard errors) 
##               Estimate Std. Error z value  Pr(>|z|)
## (Intercept) -9.8116272  5.2904557 -1.8546 0.0636548
## pov          0.0318036  0.0083283  3.8187 0.0001341
## nohs         0.0256857  0.0089878  2.8578 0.0042653
## hetero       0.1019978  0.0555118  1.8374 0.0661500
## 
## Lambda: 0.23256, LR test value: 2.7998, p-value: 0.094274
## Asymptotic standard error: 0.1267
##     z-value: 1.8355, p-value: 0.06643
## Wald statistic: 3.3691, p-value: 0.06643
## 
## Log likelihood: -113.1228 for error model
## ML residual variance (sigma squared): 0.32162, (sigma: 0.56711)
## Number of observations: 132 
## Number of parameters estimated: 6 
## AIC: 238.25, (AIC for lm: 239.05)
sar.err6 <-errorsarlm(ass.p ~ pov+nohs+hetero,data=seattle@data, W)
summary(sar.err6)
## 
## Call:
## errorsarlm(formula = ass.p ~ pov + nohs + hetero, data = seattle@data, 
##     listw = W)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -97.7023 -20.0637  -4.8453   7.5729 516.6583 
## 
## Type: error 
## Coefficients: (asymptotic standard errors) 
##                Estimate  Std. Error z value  Pr(>|z|)
## (Intercept) -1450.95068   609.17561 -2.3818   0.01723
## pov             4.62517     0.95572  4.8394 1.302e-06
## nohs           -0.88634     1.12656 -0.7868   0.43142
## hetero         15.20670     6.39668  2.3773   0.01744
## 
## Lambda: 0.51072, LR test value: 20.197, p-value: 6.9861e-06
## Asymptotic standard error: 0.099932
##     z-value: 5.1107, p-value: 3.21e-07
## Wald statistic: 26.119, p-value: 3.21e-07
## 
## Log likelihood: -733.3578 for error model
## ML residual variance (sigma squared): 3701.7, (sigma: 60.842)
## Number of observations: 132 
## Number of parameters estimated: 6 
## AIC: 1478.7, (AIC for lm: 1496.9)